library(tidyverse)
library(ggdist)
library(ggside)
library(easystats)
library(patchwork)
library(brms)

logmod <- function(x) sign(x) * log(1 + abs(x))
sqrtmod <- function(x) sign(x) * sqrt(abs(x))
cbrtmod <- function(x) sign(x) * (abs(x)**(1 / 3))

perceptual <- read.csv("../data/preprocessed_perceptual.csv") |>
  mutate(
    Block = as.factor(Block),
    Illusion_Side = as.factor(Illusion_Side)
  )

Ebbinghaus

Error Rate

data <- filter(perceptual, Illusion_Type == "Ebbinghaus") 

Descriptive

plot_desc_errors <- function(data) {
  data |> 
    ggplot(aes(x = Illusion_Difference)) +
    geom_histogram(data=filter(data, Error == 1), 
                   aes(y=..count../sum(..count..), fill = Illusion_Side), 
                   binwidth = diff(range(data$Illusion_Difference)) / 20, color = "white") +
    geom_smooth(aes(y = Error, color = Illusion_Side), 
                method = 'gam', 
                formula = y ~ s(x, bs = "cs"),
                method.args = list(family = "binomial")) +
    scale_x_continuous(expand = c(0, 0)) +
    scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
    scale_color_manual(values = c("-1" = "#FF5722", "1" = "#43A047")) +
    scale_fill_manual(values = c("-1" = "#FF5722", "1" = "#43A047")) +
    coord_cartesian(ylim = c(0, 1), xlim = range(data$Illusion_Difference)) +
    labs(x = "Task Difficulty", y = "Probability of Error") +
    theme_modern()
}

plot_desc_errors(data)

Model Selection

test_models <- function(data, y = "RT") {
  # TODO: add random effect
  models <- list()
  for(f in c("Illusion_Difference", 
             "logmod(Illusion_Difference)", 
             "sqrtmod(Illusion_Difference)", 
             "cbrtmod(Illusion_Difference)")) {
    m <- glmmTMB::glmmTMB(as.formula(paste0(y, " ~ ", f, "+ (1|Participant)")),
            data = data, family = ifelse(y == "RT", "gaussian", "binomial")
          )
    models[[f]] <- m
  }
  performance::test_performance(models) |> 
    data_select(-Model) |> 
    arrange(desc(BF))
}

insight::display(test_models(data, "Error"))
Name BF
logmod(Illusion_Difference) 0.911
sqrtmod(Illusion_Difference) 0.722
cbrtmod(Illusion_Difference) 0.644
Illusion_Difference

Each model is compared to logmod(Illusion_Difference).

Model Specification

formula <- brms::bf(
  Error ~ Illusion_Difference +
    (1 + Illusion_Difference | Participant),
  family = "bernoulli"
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_ebbinghaus_err <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 2.0 seconds.
## Chain 1 finished in 2.1 seconds.
## Chain 2 finished in 2.2 seconds.
## Chain 4 finished in 2.3 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 2.1 seconds.
## Total execution time: 2.4 seconds.

save(perceptual_ebbinghaus_err, file="models/perceptual_ebbinghaus_err.Rdata")

Model Inspection

load("models/perceptual_ebbinghaus_err.Rdata")
plot_model_errors <- function(data, model) {
  pred <- estimate_relation(model, length = 100)
  
  data |> 
    ggplot(aes(x = Illusion_Difference)) +
    geom_histogram(data=filter(data, Error == 1), 
                   aes(y=..count../sum(..count..)), 
                   binwidth = diff(range(data$Illusion_Difference)) / 20) +
    geom_ribbon(data = pred,
                aes(ymin = CI_low, ymax = CI_high),
                alpha = 1/3, fill = "red") +
    geom_line(data = pred, 
              aes(y = Predicted),
              color = "red") +
    scale_x_continuous(expand = c(0, 0)) +
    scale_y_continuous(expand = c(0, 0), labels = scales::percent) +
    coord_cartesian(ylim = c(0, 1), xlim = range(data$Illusion_Difference)) +
    labs(x = "Task Difficulty", y = "Probability of Error") +
    theme_modern()
}

plot_model_errors(data, perceptual_ebbinghaus_err)

Model Performance

performance::performance(perceptual_ebbinghaus_err, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.10 0.01 0.57

Response Time

data <- filter(perceptual, Illusion_Type == "Ebbinghaus", Error == 0) 

Descriptive

plot_desc_rt <- function(data) {
  data |> 
    ggplot(aes(x = Illusion_Difference, y = RT)) +
    # ggpointdensity::geom_pointdensity(size = 3, alpha=0.5) +
    # scale_color_gradientn(colors = c("grey", "black"), guide = "none") +
    # ggnewscale::new_scale_color() +
    stat_density_2d(aes(fill = ..density..), geom = "raster", contour = FALSE) +
    scale_fill_gradientn(colors = c("white", "black"), guide = "none") +
    ggnewscale::new_scale_fill() +
    geom_smooth(aes(color = Illusion_Side, fill = Illusion_Side), 
                method = 'gam', 
                formula = y ~ s(x, bs = "cs")) +
    scale_color_manual(values = c("-1" = "#FF5722", "1" = "#43A047")) +
    scale_fill_manual(values = c("-1" = "#FF5722", "1" = "#43A047")) +
    scale_x_discrete(expand = c(0, 0)) +
    scale_y_continuous(expand = c(0, 0)) +
    labs(x = "Task Difficulty", y = "Response Time (s)") +
    theme_modern() +
    ggside::geom_ysidedensity(aes(fill = Illusion_Side), color = NA, alpha = 0.3) +
    ggside::theme_ggside_void() +
    ggside::scale_ysidex_continuous(expand = c(0, 0)) +
    ggside::ggside()
}

plot_desc_rt(data)

Model Selection

insight::display(test_models(data, "RT"))
Name BF
cbrtmod(Illusion_Difference) 2.01
sqrtmod(Illusion_Difference) 1.72
logmod(Illusion_Difference) 1.18
Illusion_Difference

Each model is compared to cbrtmod(Illusion_Difference).

Model Specification

# TODO: shift to lognormal
formula <- brms::bf(
  RT ~ Illusion_Difference +
    (1 + Illusion_Difference | Participant)
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_ebbinghaus_rt <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 3 finished in 2.6 seconds.
## Chain 1 finished in 2.8 seconds.
## Chain 4 finished in 2.7 seconds.
## Chain 2 finished in 2.8 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 2.7 seconds.
## Total execution time: 2.9 seconds.

save(perceptual_ebbinghaus_rt, file="models/perceptual_ebbinghaus_rt.Rdata")

Model Inspection

load("models/perceptual_ebbinghaus_rt.Rdata")
plot_model_rt <- function(data, model) {
  pred <- estimate_relation(model, length = 100)
  
  data |> 
    ggplot(aes(x = Illusion_Difference)) +
    stat_density_2d(aes(fill = ..density.., y = RT), geom = "raster", contour = FALSE) +
    scale_fill_gradientn(colors = c("white", "black"), guide = "none") +
    ggnewscale::new_scale_fill() +
    geom_ribbon(data = pred,
                aes(ymin = CI_low, ymax = CI_high),
                alpha = 1/3, fill = "red") +
    geom_line(data = pred, 
              aes(y = Predicted),
              color = "red") +
    scale_x_continuous(expand = c(0, 0)) +
    scale_y_continuous(expand = c(0, 0)) +
    labs(x = "Task Difficulty", y = "Response Time (s)") +
    theme_modern()
}

plot_model_rt(data, perceptual_ebbinghaus_rt)

Model Performance

performance::performance(perceptual_ebbinghaus_rt, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.27 0.04 0.38

Müller-Lyer

Error Rate

data <- filter(perceptual, Illusion_Type == "MullerLyer") 

Descriptive

plot_desc_errors(data)

Model Selection

insight::display(test_models(data, "Error"))
Name BF
logmod(Illusion_Difference) 0.955
sqrtmod(Illusion_Difference) 0.778
cbrtmod(Illusion_Difference) 0.684
Illusion_Difference

Each model is compared to logmod(Illusion_Difference).

Model Specification

formula <- brms::bf(
  Error ~ Illusion_Difference +
    (1 + Illusion_Difference | Participant),
  family = "bernoulli"
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_mullerlyer_err <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 1.8 seconds.
## Chain 3 finished in 1.8 seconds.
## Chain 2 finished in 2.0 seconds.
## Chain 4 finished in 2.2 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 2.0 seconds.
## Total execution time: 2.3 seconds.

save(perceptual_mullerlyer_err, file="models/perceptual_mullerlyer_err.Rdata")

Model Inspection

load("models/perceptual_mullerlyer_err.Rdata")
plot_model_errors(data, perceptual_mullerlyer_err)

Model Performance

performance::performance(perceptual_mullerlyer_err, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.10 0.04 0.42

Response Time

data <- filter(perceptual, Illusion_Type == "MullerLyer", Error == 0) 

Descriptive

plot_desc_rt(data)

Model Selection

insight::display(test_models(data, "RT"))
Name BF
cbrtmod(Illusion_Difference) 93.08
sqrtmod(Illusion_Difference) 37.00
logmod(Illusion_Difference) 3.09
Illusion_Difference

Each model is compared to cbrtmod(Illusion_Difference).

Model Specification

# TODO: shift to lognormal
formula <- brms::bf(
  RT ~ cbrtmod(Illusion_Difference) +
    (1 + cbrtmod(Illusion_Difference) | Participant)
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_mullerlyer_rt <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 4 finished in 3.7 seconds.
## Chain 2 finished in 4.0 seconds.
## Chain 3 finished in 4.1 seconds.
## Chain 1 finished in 4.4 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 4.0 seconds.
## Total execution time: 4.5 seconds.

save(perceptual_mullerlyer_rt, file="models/perceptual_mullerlyer_rt.Rdata")

Model Inspection

load("models/perceptual_mullerlyer_rt.Rdata")
plot_model_rt(data, perceptual_mullerlyer_rt)

Model Performance

performance::performance(perceptual_mullerlyer_rt, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.36 0.16 0.72

Vertical-Horizontal

Error Rate

data <- filter(perceptual, Illusion_Type == "VerticalHorizontal") 

Descriptive

plot_desc_errors(data)

Model Selection

insight::display(test_models(data, "Error"))
Name BF
logmod(Illusion_Difference) 0.902
sqrtmod(Illusion_Difference) 0.550
cbrtmod(Illusion_Difference) 0.435
Illusion_Difference

Each model is compared to logmod(Illusion_Difference).

Model Specification

formula <- brms::bf(
  Error ~ Illusion_Difference +
    (1 + Illusion_Difference | Participant),
  family = "bernoulli"
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_verticalhorizontal_err <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 2.1 seconds.
## Chain 3 finished in 2.1 seconds.
## Chain 4 finished in 2.2 seconds.
## Chain 2 finished in 2.5 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 2.2 seconds.
## Total execution time: 2.6 seconds.

save(perceptual_verticalhorizontal_err, file="models/perceptual_verticalhorizontal_err.Rdata")

Model Inspection

load("models/perceptual_verticalhorizontal_err.Rdata")
plot_model_errors(data, perceptual_verticalhorizontal_err)

Model Performance

performance::performance(perceptual_verticalhorizontal_err, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.12 0.02 0.59

Response Time

data <- filter(perceptual, Illusion_Type == "VerticalHorizontal", Error == 0) 

Descriptive

plot_desc_rt(data)

Model Selection

insight::display(test_models(data, "RT"))
Name BF
logmod(Illusion_Difference) 1.01
sqrtmod(Illusion_Difference) 0.950
cbrtmod(Illusion_Difference) 0.870
Illusion_Difference

Each model is compared to logmod(Illusion_Difference).

Model Specification

# TODO: shift to lognormal
formula <- brms::bf(
  RT ~ Illusion_Difference +
    (1 + Illusion_Difference | Participant)
)

# brms::get_prior(formula, data = data)
# brms::validate_prior(formula)

perceptual_verticalhorizontal_rt <- brms::brm(formula,
  data = data,
  refresh = 0
)
## Running MCMC with 4 parallel chains...
## 
## Chain 1 finished in 2.4 seconds.
## Chain 4 finished in 2.4 seconds.
## Chain 3 finished in 2.5 seconds.
## Chain 2 finished in 2.6 seconds.
## 
## All 4 chains finished successfully.
## Mean chain execution time: 2.5 seconds.
## Total execution time: 2.7 seconds.

save(perceptual_verticalhorizontal_rt, file="models/perceptual_verticalhorizontal_rt.Rdata")

Model Inspection

load("models/perceptual_verticalhorizontal_rt.Rdata")
plot_model_rt(data, perceptual_verticalhorizontal_rt)

Model Performance

performance::performance(perceptual_verticalhorizontal_rt, metrics = c("R2", "ICC")) |> 
  display()
R2 R2 (marg.) ICC
0.38 0.05 0.54

Individual Scores

get_scores <- function(model, illusion="Ebbinghaus") {
  family <- insight::find_response(model)
  scores <- modelbased::estimate_grouplevel(model) |> 
    data_filter(str_detect(Level, "Participant")) |> 
    mutate(Group = str_remove(Group, ": Participant"),
           Level = str_remove(Level, "Participant."),
           Group = str_remove_all(Group, "cbrtmod"),
           Group = str_remove_all(Group, "sqrtmod"),
           Group = str_remove_all(Group, "logmod"),
           Group = str_remove_all(Group, "abs"),
           Group = str_replace(Group, "Illusion_Difference", "Diff")) 
  
  p <- scores |> 
    ggplot(aes(x = Median, y = Level)) +
    geom_pointrange(aes(xmin = CI_low, xmax = CI_high, color = Group)) +
    scale_color_flat_d() +
    scale_fill_flat_d() +
    labs(y = "Participants") +
    theme_modern() +
    theme(strip.placement = "oustide",
          axis.title.x = element_blank(),
          axis.text.y = element_blank()) +
    ggside::geom_xsidedensity(aes(fill=Group, y = after_stat(scaled)), color = NA, alpha = 0.3) +
    ggside::theme_ggside_void() +
    ggside::scale_xsidey_continuous(expand = c(0, 0)) +
    ggside::ggside() +
    facet_grid(~Group, switch = "both", scales = "free")  +
    ggtitle(paste(illusion, "-", family))
  
  scores <- scores |>
    select(Group, Participant = Level, Median) |> 
    pivot_wider(names_from = "Group", values_from = "Median") |> 
    data_rename("Diff", 
                paste0("Perception_", illusion, "_Difficulty_", family)) |> 
    data_rename("Intercept", 
                paste0("Perception_", illusion, "_Intercept_", family)) 
  list(scores = scores, p = p)
  
}

ebbinghaus_err <- get_scores(perceptual_ebbinghaus_err, illusion="Ebbinghaus")
ebbinghaus_rt <- get_scores(perceptual_ebbinghaus_rt, illusion="Ebbinghaus")
mullerlyer_err <- get_scores(perceptual_mullerlyer_err, illusion="MullerLyer")
mullerlyer_rt <- get_scores(perceptual_mullerlyer_rt, illusion="MullerLyer")
verticalhorizontal_err <- get_scores(perceptual_verticalhorizontal_err, illusion="VerticalHorizontal")
verticalhorizontal_rt <- get_scores(perceptual_verticalhorizontal_rt, illusion="VerticalHorizontal")

p <- (ebbinghaus_err$p + ebbinghaus_rt$p) /
  (mullerlyer_err$p + mullerlyer_rt$p) /
  (verticalhorizontal_err$p + verticalhorizontal_rt$p) +
  plot_layout(guides = "collect") +
  plot_annotation(title = "Inter- and Intra- Variability of Perceptual Scores", theme = theme(plot.title = element_text(face = "bold", hjust = 0.5))) 
p


scores <- ebbinghaus_err$scores |> 
  merge(ebbinghaus_rt$scores, by="Participant") |> 
  merge(mullerlyer_err$scores, by="Participant") |> 
  merge(mullerlyer_rt$scores, by="Participant") |> 
  merge(verticalhorizontal_err$scores, by="Participant") |> 
  merge(verticalhorizontal_rt$scores, by="Participant")

write.csv(scores, "../data/scores_perceptual.csv", row.names = FALSE)